int active_domains[MAX_OPROF_DOMAINS];
int active_ready[MAX_OPROF_DOMAINS];
-unsigned int adomains = 0;
-unsigned int activated = 0;
-struct domain *primary_profiler = NULL;
+unsigned int adomains;
+unsigned int activated;
+struct domain *primary_profiler;
int xenoprof_state = XENOPROF_IDLE;
-u64 total_samples = 0;
-u64 invalid_buffer_samples = 0;
-u64 corrupted_buffer_samples = 0;
-u64 lost_samples = 0;
-u64 active_samples = 0;
-u64 idle_samples = 0;
-u64 others_samples = 0;
+u64 total_samples;
+u64 invalid_buffer_samples;
+u64 corrupted_buffer_samples;
+u64 lost_samples;
+u64 active_samples;
+u64 idle_samples;
+u64 others_samples;
extern int nmi_init(int *num_events, int *is_primary, char *cpu_type);
int is_active(struct domain *d)
{
- xenoprof_t *x = d->xenoprof;
- if ( x )
- {
- if ( x->domain_type == XENOPROF_DOMAIN_ACTIVE )
- return 1;
- else
- return 0;
- }
- else
- return 0;
+ struct xenoprof *x = d->xenoprof;
+ return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
}
int is_profiled(struct domain *d)
active_samples = 0;
idle_samples = 0;
others_samples = 0;
-
- return;
}
static void xenoprof_reset_buf(struct domain *d)
{
int j;
- xenoprof_buf_t *buf;
+ struct xenoprof_buf *buf;
- if ( !d->xenoprof )
+ if ( d->xenoprof == NULL )
{
- printk("xenoprof_reset_buf: ERROR - Unexpected Xenoprof NULL pointer \n");
+ printk("xenoprof_reset_buf: ERROR - Unexpected "
+ "Xenoprof NULL pointer \n");
return;
}
- for ( j=0; j<MAX_VIRT_CPUS; j++ )
+ for ( j = 0; j < MAX_VIRT_CPUS; j++ )
{
buf = d->xenoprof->vcpu[j].buffer;
- if ( buf )
+ if ( buf != NULL )
{
buf->event_head = 0;
buf->event_tail = 0;
int active_index(struct domain *d)
{
- int i;
- int id;
+ int i, id = d->domain_id;
- id = d->domain_id;
- for ( i=0; i<adomains; i++ )
+ for ( i = 0; i < adomains; i++ )
if ( active_domains[i] == id )
- {
return i;
- }
+
return -1;
}
int set_active(struct domain *d)
{
int ind;
- xenoprof_t *x;
+ struct xenoprof *x;
ind = active_index(d);
- if ( ind <0 )
+ if ( ind < 0 )
return -EPERM;
x = d->xenoprof;
- if ( x )
- {
- x->domain_ready = 1;
- x->domain_type = XENOPROF_DOMAIN_ACTIVE;
- active_ready[ind] = 1;
- activated++;
- return 0;
- }
- else
+ if ( x == NULL )
return -EPERM;
+
+ x->domain_ready = 1;
+ x->domain_type = XENOPROF_DOMAIN_ACTIVE;
+ active_ready[ind] = 1;
+ activated++;
+
+ return 0;
}
int reset_active(struct domain *d)
{
int ind;
- xenoprof_t *x;
+ struct xenoprof *x;
ind = active_index(d);
- if ( ind <0 )
+ if ( ind < 0 )
return -EPERM;
x = d->xenoprof;
- if ( x )
- {
- x->domain_ready = 0;
- x->domain_type = XENOPROF_DOMAIN_IGNORED;
- active_ready[ind] = 0;
- activated--;
- if ( activated <= 0 )
- adomains = 0;
- return 0;
- }
- else
+ if ( x == NULL )
return -EPERM;
+
+ x->domain_ready = 0;
+ x->domain_type = XENOPROF_DOMAIN_IGNORED;
+ active_ready[ind] = 0;
+ activated--;
+ if ( activated <= 0 )
+ adomains = 0;
+
+ return 0;
}
int set_active_domains(int num)
int i;
struct domain *d;
- /* reset any existing active domains from previous runs */
- for ( i=0; i<adomains; i++ )
+ /* Reset any existing active domains from previous runs. */
+ for ( i = 0; i < adomains; i++ )
{
if ( active_ready[i] )
{
d = find_domain_by_id(active_domains[i]);
- if ( d )
+ if ( d != NULL )
{
reset_active(d);
put_domain(d);
}
}
- adomains=num;
+ adomains = num;
/* Add primary profiler to list of active domains if not there yet */
primary = active_index(primary_profiler);
if ( primary == -1 )
{
- /* return if there is no space left on list */
+ /* Return if there is no space left on list. */
if ( num >= MAX_OPROF_DOMAINS )
return -E2BIG;
- else
- {
- active_domains[num] = primary_profiler->domain_id;
- num++;
- }
+ active_domains[num] = primary_profiler->domain_id;
+ num++;
}
adomains = num;
activated = 0;
- for ( i=0; i<adomains; i++ )
- {
+ for ( i = 0; i < adomains; i++ )
active_ready[i] = 0;
- }
return 0;
}
-void xenoprof_log_event(struct vcpu *vcpu, unsigned long eip, int mode, int event)
+void xenoprof_log_event(
+ struct vcpu *vcpu, unsigned long eip, int mode, int event)
{
- xenoprof_vcpu_t *v;
- xenoprof_buf_t *buf;
+ struct xenoprof_vcpu *v;
+ struct xenoprof_buf *buf;
int head;
int tail;
int size;
/* Count samples in idle separate from other unmonitored domains */
if ( !is_profiled(vcpu->domain) )
{
- others_samples++;
- return;
+ others_samples++;
+ return;
}
v = &vcpu->domain->xenoprof->vcpu[vcpu->vcpu_id];
/* Sanity check. Should never happen */
- if ( !v->buffer )
+ if ( v->buffer == NULL )
{
invalid_buffer_samples++;
return;
int i, order;
/* allocate pages to store sample buffer shared with domain */
- order = get_order_from_pages(npages);
- rawbuf = alloc_xenheap_pages(order);
- if( rawbuf == NULL )
+ order = get_order_from_pages(npages);
+ rawbuf = alloc_xenheap_pages(order);
+ if ( rawbuf == NULL )
{
printk("alloc_xenoprof_buf(): memory allocation failed\n");
return 0;
}
/* Share pages so that kernel can map it */
- for ( i=0; i<npages; i++ )
- {
- share_xen_page_with_guest(virt_to_page(rawbuf + i * PAGE_SIZE),
- d, XENSHARE_writable);
- }
+ for ( i = 0; i < npages; i++ )
+ share_xen_page_with_guest(
+ virt_to_page(rawbuf + i * PAGE_SIZE),
+ d, XENSHARE_writable);
return rawbuf;
}
int nvcpu, npages, bufsize, max_bufsize;
int i;
- d->xenoprof = xmalloc(xenoprof_t);
+ d->xenoprof = xmalloc(struct xenoprof);
- if ( !d->xenoprof )
+ if ( d->xenoprof == NULL )
{
printk ("alloc_xenoprof_struct(): memory "
"allocation (xmalloc) failed\n");
memset(d->xenoprof, 0, sizeof(*d->xenoprof));
nvcpu = 0;
- for_each_vcpu(d, v)
+ for_each_vcpu ( d, v )
nvcpu++;
/* reduce buffer size if necessary to limit pages allocated */
- bufsize = sizeof(xenoprof_buf_t) +
+ bufsize = sizeof(struct xenoprof_buf) +
(max_samples - 1) * sizeof(struct event_log);
max_bufsize = (MAX_OPROF_SHARED_PAGES * PAGE_SIZE) / nvcpu;
if ( bufsize > max_bufsize )
{
bufsize = max_bufsize;
- max_samples = ( (max_bufsize - sizeof(xenoprof_buf_t)) /
+ max_samples = ( (max_bufsize - sizeof(struct xenoprof_buf)) /
sizeof(struct event_log) ) + 1;
}
npages = (nvcpu * bufsize - 1) / PAGE_SIZE + 1;
d->xenoprof->rawbuf = alloc_xenoprof_buf(d, npages);
- if ( !d->xenoprof->rawbuf )
+ if ( d->xenoprof->rawbuf == NULL )
{
xfree(d->xenoprof);
d->xenoprof = NULL;
d->xenoprof->domain_type = XENOPROF_DOMAIN_IGNORED;
/* Update buffer pointers for active vcpus */
- i=0;
- for_each_vcpu(d, v)
+ i = 0;
+ for_each_vcpu ( d, v )
{
d->xenoprof->vcpu[v->vcpu_id].event_size = max_samples;
d->xenoprof->vcpu[v->vcpu_id].buffer =
- (xenoprof_buf_t *)&d->xenoprof->rawbuf[i * bufsize];
+ (struct xenoprof_buf *)&d->xenoprof->rawbuf[i * bufsize];
d->xenoprof->vcpu[v->vcpu_id].buffer->event_size = max_samples;
d->xenoprof->vcpu[v->vcpu_id].buffer->vcpu_id = v->vcpu_id;
void free_xenoprof_pages(struct domain *d)
{
- xenoprof_t *x;
+ struct xenoprof *x;
int order;
x = d->xenoprof;
+ if ( x == NULL )
+ return;
- if ( x )
+ if ( x->rawbuf != NULL )
{
- if ( x->rawbuf )
- {
- order = get_order_from_pages(x->npages);
- free_xenheap_pages(x->rawbuf, order);
- }
- xfree(x);
- d->xenoprof = NULL;
+ order = get_order_from_pages(x->npages);
+ free_xenheap_pages(x->rawbuf, order);
}
+
+ xfree(x);
+ d->xenoprof = NULL;
}
int xenoprof_init(int max_samples, xenoprof_init_result_t *init_result)
if ( ret < 0 )
goto err;
- /* we allocate xenoprof struct and buffers only at first time
- xenoprof_init is called. Memory is then kept until domain is destroyed */
- if ( !d->xenoprof )
- {
- if ( (ret = alloc_xenoprof_struct(d, max_samples)) < 0 )
- goto err;
- }
+ /*
+ * We allocate xenoprof struct and buffers only at first time xenoprof_init
+ * is called. Memory is then kept until domain is destroyed.
+ */
+ if ( (d->xenoprof == NULL) &&
+ ((ret = alloc_xenoprof_struct(d, max_samples)) < 0) )
+ goto err;
xenoprof_reset_buf(d);
return ret;
}
-#define PRIV_OP(op) ( (op == XENOPROF_set_active) \
+#define PRIV_OP(op) ( (op == XENOPROF_set_active) \
|| (op == XENOPROF_reserve_counters) \
- || (op == XENOPROF_setup_events) \
- || (op == XENOPROF_start) \
- || (op == XENOPROF_stop) \
+ || (op == XENOPROF_setup_events) \
+ || (op == XENOPROF_start) \
+ || (op == XENOPROF_stop) \
|| (op == XENOPROF_release_counters) \
|| (op == XENOPROF_shutdown))
{
int ret = 0;
- if ( PRIV_OP(op) && current->domain != primary_profiler )
+ if ( PRIV_OP(op) && (current->domain != primary_profiler) )
{
printk("xenoprof: dom %d denied privileged operation %d\n",
current->domain->domain_id, op);
if ( xenoprof_state != XENOPROF_COUNTERS_RESERVED )
return -EPERM;
if ( adomains == 0 )
- {
set_active_domains(0);
- }
if ( copy_from_user((void *)&counter_config, (void *)arg1,
arg2 * sizeof(struct op_counter_config)) )
break;
case XENOPROF_start:
+ ret = -EPERM;
if ( (xenoprof_state == XENOPROF_READY) &&
(activated == adomains) )
- {
ret = nmi_start();
- }
- else
- ret= -EPERM;
- if ( !ret )
+ if ( ret == 0 )
xenoprof_state = XENOPROF_PROFILING;
break;
break;
case XENOPROF_release_counters:
+ ret = -EPERM;
if ( (xenoprof_state == XENOPROF_COUNTERS_RESERVED) ||
(xenoprof_state == XENOPROF_READY) )
{
xenoprof_state = XENOPROF_IDLE;
nmi_release_counters();
nmi_disable_virq();
+ ret = 0;
}
- else
- ret = -EPERM;
break;
case XENOPROF_shutdown:
+ ret = -EPERM;
if ( xenoprof_state == XENOPROF_IDLE )
{
activated = 0;
primary_profiler = NULL;
ret = 0;
}
- else
- ret = -EPERM;
break;
default: